p2m_unlock(p2m);
return 1;
}
+ else if ( p2ma == p2m_access_n2rwx )
+ {
+ ASSERT(access_w || access_r || access_x);
+ p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt, p2m_access_rwx);
+ }
p2m_unlock(p2m);
/* Otherwise, check if there is a memory event listener, and send the message along */
}
else
{
- /* A listener is not required, so clear the access restrictions */
- p2m_lock(p2m);
- p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt, p2m_access_rwx);
- p2m_unlock(p2m);
+ if ( p2ma != p2m_access_n2rwx )
+ {
+ /* A listener is not required, so clear the access restrictions */
+ p2m_lock(p2m);
+ p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt, p2m_access_rwx);
+ p2m_unlock(p2m);
+ }
return 1;
}
req.type = MEM_EVENT_TYPE_ACCESS;
req.reason = MEM_EVENT_REASON_VIOLATION;
- /* Pause the current VCPU unconditionally */
- vcpu_pause_nosync(v);
- req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED;
+ /* Pause the current VCPU */
+ if ( p2ma != p2m_access_n2rwx )
+ {
+ vcpu_pause_nosync(v);
+ req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED;
+ }
/* Send request to mem event */
req.gfn = gfn;
mem_event_put_request(d, &d->mem_event->access, &req);
- /* VCPU paused, mem event request sent */
- return 0;
+ /* VCPU may be paused, return whether we promoted automatically */
+ return (p2ma == p2m_access_n2rwx);
}
void p2m_mem_access_resume(struct domain *d)
p2m_access_wx,
p2m_access_rwx,
p2m_access_rx2rw,
+ p2m_access_n2rwx,
p2m->default_access,
};
HVMMEM_access_rwx,
HVMMEM_access_rx2rw, /* Page starts off as r-x, but automatically
* change to r-w on a write */
+ HVMMEM_access_n2rwx, /* Log access: starts off as n, automatically
+ * goes to rwx, generating an event without
+ * pausing the vcpu */
HVMMEM_access_default /* Take the domain default */
} hvmmem_access_t;
/* Notify that a region of memory is to have specific access types */